From b10564cbaa8fe68ea5817f62b7c3c3e00c057eeb Mon Sep 17 00:00:00 2001 From: Alex Williamson Date: Thu, 16 Aug 2007 09:37:54 -0600 Subject: [PATCH] [IA64] Shrink vtlb size Instrumenting thash_purge_all() shows a very low usage of vtlb entries (21 at most). This patch shrinks the default vtlb size from 512KB to 16KB to optimize memory. This also speeds up ptc_e emulation. To improve the hash function, frequency of collision never changed and there is no performance degradation. Signed-off-by: Kouya Shimura --- xen/arch/ia64/vmx/vmmu.c | 38 +++---------------------------------- xen/arch/ia64/vmx/vtlb.c | 17 ++++++++++++++--- xen/include/asm-ia64/vmmu.h | 2 +- 3 files changed, 18 insertions(+), 39 deletions(-) diff --git a/xen/arch/ia64/vmx/vmmu.c b/xen/arch/ia64/vmx/vmmu.c index b1eec86354..8ab640f542 100644 --- a/xen/arch/ia64/vmx/vmmu.c +++ b/xen/arch/ia64/vmx/vmmu.c @@ -32,9 +32,9 @@ static void __init parse_vtlb_size(char *s) if (sz > 0) { default_vtlb_sz = fls(sz - 1); - /* minimum 256KB (since calculated tag might be broken) */ - if (default_vtlb_sz < 18) - default_vtlb_sz = 18; + /* minimum 16KB (for tag uniqueness) */ + if (default_vtlb_sz < 14) + default_vtlb_sz = 14; } } @@ -240,40 +240,8 @@ void machine_tlb_insert(struct vcpu *v, thash_data_t *tlb) */ void machine_tlb_purge(u64 va, u64 ps) { -// u64 psr; -// psr = ia64_clear_ic(); ia64_ptcl(va, ps << 2); -// ia64_set_psr(psr); -// ia64_srlz_i(); -// return; } -/* -u64 machine_thash(u64 va) -{ - return ia64_thash(va); -} - -u64 machine_ttag(u64 va) -{ - return ia64_ttag(va); -} -*/ -thash_data_t * vsa_thash(PTA vpta, u64 va, u64 vrr, u64 *tag) -{ - u64 index,pfn,rid,pfn_bits; - pfn_bits = vpta.size-5-8; - pfn = REGION_OFFSET(va)>>_REGION_PAGE_SIZE(vrr); - rid = _REGION_ID(vrr); - index = ((rid&0xff)<>8)&0xffff) | ((pfn >>pfn_bits)<<16); - return (thash_data_t *)((vpta.base<> _REGION_PAGE_SIZE(vrr); + rid = _REGION_ID(vrr); + index = (pfn ^ rid) & ((1UL << (vpta.size - 5)) - 1); + *tag = pfn ^ (rid << 39); + return (thash_data_t *)((vpta.base << PTA_BASE_SHIFT) + (index << 5)); +} + /* * purge software guest tlb */ @@ -308,7 +319,7 @@ static void vtlb_purge(VCPU *v, u64 va, u64 ps) size = PSIZE(rr_ps); vrr.ps = rr_ps; while (num) { - cur = vsa_thash(hcb->pta, curadr, vrr.rrval, &tag); + cur = vtlb_thash(hcb->pta, curadr, vrr.rrval, &tag); while (cur) { if (cur->etag == tag && cur->ps == rr_ps) cur->etag = 1UL << 63; @@ -401,7 +412,7 @@ void vtlb_insert(VCPU *v, u64 pte, u64 itir, u64 va) vcpu_get_rr(v, va, &vrr.rrval); vrr.ps = itir_ps(itir); VMX(v, psbits[va >> 61]) |= (1UL << vrr.ps); - hash_table = vsa_thash(hcb->pta, va, vrr.rrval, &tag); + hash_table = vtlb_thash(hcb->pta, va, vrr.rrval, &tag); cch = hash_table; while (cch) { if (INVALID_TLB(cch)) { @@ -639,7 +650,7 @@ thash_data_t *vtlb_lookup(VCPU *v, u64 va,int is_data) ps = __ffs(psbits); psbits &= ~(1UL << ps); vrr.ps = ps; - cch = vsa_thash(hcb->pta, va, vrr.rrval, &tag); + cch = vtlb_thash(hcb->pta, va, vrr.rrval, &tag); do { if (cch->etag == tag && cch->ps == ps) return cch; diff --git a/xen/include/asm-ia64/vmmu.h b/xen/include/asm-ia64/vmmu.h index 3df4258b16..2b4ccfc6db 100644 --- a/xen/include/asm-ia64/vmmu.h +++ b/xen/include/asm-ia64/vmmu.h @@ -24,7 +24,7 @@ #define XEN_TLBthash_H #define MAX_CCN_DEPTH (15) // collision chain depth -#define DEFAULT_VTLB_SZ (19) // 512K hash + 512K c-chain for VTLB +#define DEFAULT_VTLB_SZ (14) // 16K hash + 16K c-chain for VTLB #define DEFAULT_VHPT_SZ (23) // 8M hash + 8M c-chain for VHPT #define VTLB(v,_x) (v->arch.vtlb._x) #define VHPT(v,_x) (v->arch.vhpt._x) -- 2.30.2